static void vmx_do_no_device_fault(void)
{
unsigned long cr0;
+ struct vcpu *v = current;
clts();
setup_fpu(current);
- __vmread_vcpu(CR0_READ_SHADOW, &cr0);
+ __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
if (!(cr0 & X86_CR0_TS)) {
- __vmread_vcpu(GUEST_CR0, &cr0);
+ __vmread_vcpu(v, GUEST_CR0, &cr0);
cr0 &= ~X86_CR0_TS;
__vmwrite(GUEST_CR0, cr0);
}
{
unsigned int gp, cr;
unsigned long value;
+ struct vcpu *v = current;
switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
case TYPE_MOV_TO_CR:
clts();
setup_fpu(current);
- __vmread_vcpu(GUEST_CR0, &value);
+ __vmread_vcpu(v, GUEST_CR0, &value);
value &= ~X86_CR0_TS; /* clear TS */
__vmwrite(GUEST_CR0, value);
- __vmread_vcpu(CR0_READ_SHADOW, &value);
+ __vmread_vcpu(v, CR0_READ_SHADOW, &value);
value &= ~X86_CR0_TS; /* clear TS */
__vmwrite(CR0_READ_SHADOW, value);
break;
case TYPE_LMSW:
TRACE_VMEXIT(1,TYPE_LMSW);
- __vmread_vcpu(CR0_READ_SHADOW, &value);
+ __vmread_vcpu(v, CR0_READ_SHADOW, &value);
value = (value & ~0xF) |
(((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
return vmx_set_cr0(value);
}
-static always_inline void __vmwrite_vcpu(unsigned long field, unsigned long value)
+static always_inline void __vmwrite_vcpu(struct vcpu *v, unsigned long field, unsigned long value)
{
- struct vcpu *v = current;
-
switch(field) {
case CR0_READ_SHADOW:
v->arch.arch_vmx.cpu_shadow_cr0 = value;
}
}
-static always_inline void __vmread_vcpu(unsigned long field, unsigned long *value)
+static always_inline void __vmread_vcpu(struct vcpu *v, unsigned long field, unsigned long *value)
{
- struct vcpu *v = current;
-
switch(field) {
case CR0_READ_SHADOW:
*value = v->arch.arch_vmx.cpu_shadow_cr0;
printk("__vmread_cpu: invalid field %lx\n", field);
break;
}
-
- /*
- * __vmwrite() can be used for non-current vcpu, and it's possible that
- * the vcpu field is not initialized at that case.
- *
- */
- if (!*value) {
- __vmread(field, value);
- __vmwrite_vcpu(field, *value);
- }
}
static inline int __vmwrite (unsigned long field, unsigned long value)
{
unsigned long eflags;
+ struct vcpu *v = current;
__asm__ __volatile__ ( VMWRITE_OPCODE
- MODRM_EAX_ECX
+ MODRM_EAX_ECX
:
: "a" (field) , "c" (value)
: "memory");
case CR0_READ_SHADOW:
case GUEST_CR0:
case CPU_BASED_VM_EXEC_CONTROL:
- __vmwrite_vcpu(field, value);
+ __vmwrite_vcpu(v, field, value);
break;
}
static inline void vmx_stts(void)
{
unsigned long cr0;
+ struct vcpu *v = current;
- __vmread_vcpu(GUEST_CR0, &cr0);
+ __vmread_vcpu(v, GUEST_CR0, &cr0);
if (!(cr0 & X86_CR0_TS)) {
__vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
}
- __vmread_vcpu(CR0_READ_SHADOW, &cr0);
+ __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
if (!(cr0 & X86_CR0_TS))
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
}
{
unsigned long cr0;
- __vmread_vcpu(CR0_READ_SHADOW, &cr0);
+ __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
}